%matplotlib inline
from IPython.display import display, HTML
import numpy as np
import pandas as pd
import chardet as cd
# path to sourcedata file
init_dir='/Users/jsein/Documents/Centre_IRMf/DATA/BIDS/BRAINT'
event_path=init_dir+'/sourcedata/sub-011/ses-02/sub-011_ses-02_task-03ArchiLocalizer_23_05_23_15_22.txt'
event_input = pd.read_csv(event_path,sep= '\t' , encoding='mac_roman')
# to determine the encoding, which is not standard here
with open(event_path, 'rb') as f:
result = cd.detect(f.read()) # or readline if the file is large
print(result)
df_orig = pd.read_csv(event_path, encoding=result['encoding'], delimiter='\t')
print(df_orig)
# DataFrame for events: condition, onset, duration; time in seconds for nilearn
df_ev = pd.DataFrame()
# previous condition to check blocks
prev_cond = ''
# initialize duration and onset (in seconds)
duration = -1.0
onset = -1.0
onset2 = -1.0
# iterate over rows348156
for l in df_orig[['CONDITIONS', 'ONSETS_MS', 'ONSETS_TRIGGERS', 'DURATIONS']].iterrows():
# get values
_, (cur_cond, cur_ons2, cur_ons, cur_dur) = l
# check if equality between current and past values for condition
if cur_cond==prev_cond:
duration += cur_dur
else:
# add row to DataFrame (conversion of onset and duration from trigger to seconds, 1 trigger = 80 ms; conversion of onset 2 from ms to s)
# print(cur_cond, onset, duration)
if not prev_cond=='':
df_tmp = pd.DataFrame({'condition': prev_cond, 'onset2': onset2 / 1000.0, 'onset': onset * 0.08, 'duration': duration * 0.08}, [0])
df_ev = pd.concat((df_ev, df_tmp), ignore_index=True)
# move to next block
prev_cond = cur_cond
onset2 = cur_ons2
onset = cur_ons
duration = cur_dur
# check if onsets are similar in ms and trigger
df_ev
# check all conditions
print(np.unique(df_ev['condition']))
condition_ids = [
"rest", #"rest" will be condition 0
"vertical checkerboard", #"vertical checkerboard" will be condition 1
"horizontal checkerboard", # ...
"left button press, visual instructions",
"left button press, auditory instructions",
"right button press, visual instructions",
"right button press, auditory instructions",
"mental computation, visual instructions",
"mental computation, auditory instructions",
"visual sentence",
"auditory sentence",
]
trial_types = [condition_ids[i] for i in df_ev['condition']]
print(trial_types)
events = pd.DataFrame(
{
"trial_type": trial_types,
"onset": df_ev['onset'],
"duration": df_ev['duration'],
}
)
display(events)
from pathlib import Path
local_dir='/Users/jsein/Documents/Centre_IRMf/DATA/BIDS/BRAINT'
outdir = Path(local_dir+'/derivatives/'+"results")
if not outdir.exists():
outdir.mkdir()
tsvfile = outdir / "localizer_events.tsv"
events.to_csv(tsvfile, sep="\t", index=False)
print(f"The event information has been saved to {tsvfile}")
import matplotlib.pyplot as plt
from nilearn.plotting import plot_event
plot_event(events, figsize=(15, 5))
plt.show()
confounds_file=pd.read_csv(init_dir+'/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_desc-confounds_timeseries.tsv',sep= '\t')
motion6=confounds_file[["rot_x","rot_y","rot_z","trans_x",'trans_y','trans_z']]
motion24=confounds_file[["rot_x","rot_y","rot_z","trans_x","trans_y","trans_z","rot_x_derivative1","rot_y_derivative1","rot_z_derivative1",
"trans_x_derivative1","trans_y_derivative1","trans_z_derivative1","rot_x_power2","rot_y_power2","rot_z_power2",
"trans_x_power2",'trans_y_power2','trans_z_power2',"rot_x_derivative1_power2","rot_y_derivative1_power2","rot_z_derivative1_power2",
"trans_x_derivative1_power2","trans_y_derivative1_power2","trans_z_derivative1_power2"]]
acompcor = []
for i in range(0,12):
#acompcor=acompcor.append(['w_comp_cor_' + i])
acompcor.append("w_comp_cor_"+f"{i:02d}")
for i in range(0,12):
#acompcor=acompcor.append(['w_comp_cor_' + i])
acompcor.append("c_comp_cor_"+f"{i:02d}")
acompcor_reg=confounds_file[acompcor]
pd.options.mode.chained_assignment = None
motion24[np.isnan(motion24)] = 0
#display(motion24)
#display(acompcor_reg)
nuisance=result = pd.concat([motion24,acompcor_reg],axis=1)
display(nuisance)
from nilearn.glm.first_level import FirstLevelModel,make_first_level_design_matrix
import numpy as np
import json
import os
import nibabel as nib
json_file=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-MNI152NLin2009cAsym_desc-preproc_bold.json"
fmri_data=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
mask_fmri_data=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-07BodyLocalizer_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz"
img = nib.load(fmri_data)
with open(json_file, 'r') as f:
t_r = json.load(f)['RepetitionTime']
fmri_glm = FirstLevelModel(
t_r=t_r,
noise_model="ar1",
smoothing_fwhm=4,
standardize=False,
mask_img=mask_fmri_data,
hrf_model="spm",
drift_model="cosine",
high_pass=0.01,
)
n_scans = img.shape[3] # the acquisition comprises 128 scans
frame_times = np.arange(n_scans) * t_r # here are the corresponding frame times
ref_fmri_data=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-MNI152NLin2009cAsym_boldref.nii.gz"
plot_img(ref_fmri_data, colorbar=True, cbar_tick_format="%i")
fmri_glm = fmri_glm.fit(fmri_data, events,nuisance)
import matplotlib.pyplot as plt
from nilearn.plotting import plot_design_matrix
design_matrix = fmri_glm.design_matrices_[0]
plot_design_matrix(design_matrix)
#plot_design_matrix(X1)
plt.show()
design_matrix.shape[1]
### Save the design matrix as an image
import os
outdir = Path(local_dir+'/derivatives/'+"results")
if not os.path.exists(outdir):
os.mkdir(outdir)
from os.path import join
plot_design_matrix(
design_matrix, output_file=join(outdir, "design_matrix_new.png")
)
plt.plot(design_matrix["auditory_sentence"])
plt.xlabel("scan")
plt.title("Expected auditory sentence Response")
plt.show()
from nilearn.plotting import plot_contrast_matrix
conditions = {"rest": np.zeros(design_matrix.shape[1]),
"vertical checkerboard": np.zeros(design_matrix.shape[1]),
"horizontal checkerboard": np.zeros(design_matrix.shape[1]),
"left button press, visual instructions": np.zeros(design_matrix.shape[1]),
"left button press, auditory instructions": np.zeros(design_matrix.shape[1]),
"right button press, visual instructions": np.zeros(design_matrix.shape[1]),
"right button press, auditory instructions": np.zeros(design_matrix.shape[1]),
"mental computation, visual instructions": np.zeros(design_matrix.shape[1]),
"mental computation, auditory instructions": np.zeros(design_matrix.shape[1]),
"left button press, visual instructions": np.zeros(design_matrix.shape[1]),
"visual sentence": np.zeros(design_matrix.shape[1]),
"auditory sentence": np.zeros(design_matrix.shape[1]),
}
conditions["auditory sentence"][0] = 1
conditions["vertical checkerboard"][1] = 1
auditory_minus_visual = conditions["auditory sentence"] - conditions["vertical checkerboard"]
plot_contrast_matrix(auditory_minus_visual , design_matrix=design_matrix)
eff_map = fmri_glm.compute_contrast(
auditory_minus_visual, output_type="effect_size"
)
z_map = fmri_glm.compute_contrast(auditory_minus_visual, output_type="z_score")
plot_stat_map(
z_map,
bg_img=ref_fmri_data,
threshold=3.0,
display_mode="z",
cut_coords=6,
black_bg=True,
title="Auditory minus Visual (Z>3)",
)
plt.show()
from nilearn.glm import threshold_stats_img
_, threshold = threshold_stats_img(z_map, alpha=0.001, height_control="fpr")
print(f"Uncorrected p<0.001 threshold: {threshold:.3f}")
plot_stat_map(
z_map,
bg_img=ref_fmri_data,
threshold=threshold,
display_mode="z",
cut_coords=6,
black_bg=True,
title="Auditory minus Visual (p<0.001)",
)
plt.show()
_, threshold = threshold_stats_img(
z_map, alpha=0.05, height_control="bonferroni"
)
print(f"Bonferroni-corrected, p<0.05 threshold: {threshold:.3f}")
plot_stat_map(
z_map,
bg_img=ref_fmri_data,
threshold=threshold,
display_mode="z",
cut_coords=6,
black_bg=True,
title="Auditory minus Visual (p<0.05, corrected)",
)
plt.show()
_, threshold = threshold_stats_img(z_map, alpha=0.05, height_control="fdr")
print(f"False Discovery rate = 0.05 threshold: {threshold:.3f}")
plot_stat_map(
z_map,
bg_img=ref_fmri_data,
threshold=threshold,
display_mode="z",
cut_coords=6,
black_bg=True,
title="Auditory minus Visual (fdr=0.05)",
)
plt.show()
clean_map, threshold = threshold_stats_img(
z_map, alpha=0.05, height_control="fdr", cluster_threshold=10
)
plot_stat_map(
clean_map,
bg_img=ref_fmri_data,
threshold=threshold,
display_mode="z",
cut_coords=6,
black_bg=True,
title="Visual minus Rest (fdr=0.05), clusters > 10 voxels",
)
plt.show()
from nilearn.reporting import get_clusters_table
table = get_clusters_table(
z_map, stat_threshold=threshold, cluster_threshold=20
)
table
effects_of_interest = np.vstack((conditions["auditory sentence"], conditions["vertical checkerboard"]))
plot_contrast_matrix(effects_of_interest, design_matrix)
plt.show()
z_map = fmri_glm.compute_contrast(effects_of_interest, output_type="z_score")
clean_map, threshold = threshold_stats_img(
z_map, alpha=0.05, height_control="fdr", cluster_threshold=10
)
plot_stat_map(
clean_map,
bg_img=ref_fmri_data,
threshold=threshold,
display_mode="z",
cut_coords=10,
black_bg=True,
title="Effects of interest (fdr=0.05), clusters > 10 voxels",
)
plt.show()
import nilearn
import hcp_utils as hcp
import nibabel as nib
import nilearn.plotting as plotting
import gzip
# if fsaverage is needed, as it is not available in the anat folder:
# either:
# fsaverage = nilearn.datasets.fetch_surf_fsaverage('fsaverage')
# or:
# fsaverage = init_dir+"/derivatives/fmriprep/sourcedata/freesurfer/fsaverage/lh.curv (for instance)
#cifti_func=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-fsLR_den-91k_bold.dtseries.nii"
gifti_func=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_hemi-R_space-fsnative_bold.func.gii"
gifti_func_s=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/my_gii_smoothed2.func.gii"
#cifti = nib.load(cifti_func)
gifti = nib.load(gifti_func)
gifti_s = nib.load(gifti_func_s)
img_data = [x.data for x in gifti.darrays]
data_hemiR = np.vstack(img_data)
data_hemiR.shape
img_data_s = [x.data for x in gifti_s.darrays]
data_hemiR_s = np.vstack(img_data_s)
data_hemiR_s.shape
The gifti file is in the fsnative space, with 138 835 vetices and 317 time points
from nilearn.plotting import plot_epi, show, plot_surf, plot_surf_stat_map
from nilearn.surface import load_surf_data, load_surf_mesh
from nilearn import plotting, surface
fsnative_curv_path=init_dir+"/derivatives/fmriprep/sub-011/ses-02/anat/sub-011_ses-02_hemi-R_curv.shape.gii"
curv = surface.load_surf_data(fsnative_curv_path)
import json
import os
json_file=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-MNI152NLin2009cAsym_desc-preproc_bold.json"
with open(json_file, 'r') as f:
t_r = json.load(f)['RepetitionTime']
n_scans = data_hemiR.shape[0]
frame_times = t_r * (np.arange(n_scans) + .5)
desc-confounds_timeseries.tsv from fmriprep¶confounds_file=pd.read_csv(init_dir+'/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_desc-confounds_timeseries.tsv',sep= '\t')
confounds_file
confounds_file=pd.read_csv(init_dir+'/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_desc-confounds_timeseries.tsv',sep= '\t')
motion6=confounds_file[["rot_x","rot_y","rot_z","trans_x",'trans_y','trans_z']]
motion24=confounds_file[["rot_x","rot_y","rot_z","trans_x","trans_y","trans_z","rot_x_derivative1","rot_y_derivative1","rot_z_derivative1",
"trans_x_derivative1","trans_y_derivative1","trans_z_derivative1","rot_x_power2","rot_y_power2","rot_z_power2",
"trans_x_power2",'trans_y_power2','trans_z_power2',"rot_x_derivative1_power2","rot_y_derivative1_power2","rot_z_derivative1_power2",
"trans_x_derivative1_power2","trans_y_derivative1_power2","trans_z_derivative1_power2"]]
acompcor = []
for i in range(0,12):
#acompcor=acompcor.append(['w_comp_cor_' + i])
acompcor.append("w_comp_cor_"+f"{i:02d}")
for i in range(0,12):
#acompcor=acompcor.append(['w_comp_cor_' + i])
acompcor.append("c_comp_cor_"+f"{i:02d}")
acompcor_reg=confounds_file[acompcor]
pd.options.mode.chained_assignment = None
motion24[np.isnan(motion24)] = 0
#display(motion24)
#display(acompcor_reg)
nuisance=result = pd.concat([motion24,acompcor_reg],axis=1)
display(nuisance)
from nilearn.glm.first_level import make_first_level_design_matrix
import json
json_file=init_dir+"/derivatives/fmriprep/sub-011/ses-02/func/sub-011_ses-02_task-03ArchiLocalizer_space-MNI152NLin2009cAsym_desc-preproc_bold.json"
with open(json_file, 'r') as f:
t_r = json.load(f)['RepetitionTime']
n_scans = data_hemiR.shape[0]
frame_times = t_r * (np.arange(n_scans) + .5)
design_matrix = make_first_level_design_matrix(frame_times,
events=events,
hrf_model='spm',
add_regs=nuisance
)
design_matrix.values.shape
plot_design_matrix(design_matrix)
plt.show()
from nilearn.glm.first_level import run_glm
labels, estimates = run_glm(data_hemiR, design_matrix.values)
labels_s, estimates_s = run_glm(data_hemiR_s, design_matrix.values)
Watchout for nilearn reordering of conditions columns based on alphabetical order !!!
from nilearn.plotting import plot_contrast_matrix
conditions = {"rest": np.zeros(design_matrix.shape[1]),
"vertical checkerboard": np.zeros(design_matrix.shape[1]),
"horizontal checkerboard": np.zeros(design_matrix.shape[1]),
"left button press, visual instructions": np.zeros(design_matrix.shape[1]),
"left button press, auditory instructions": np.zeros(design_matrix.shape[1]),
"right button press, visual instructions": np.zeros(design_matrix.shape[1]),
"right button press, auditory instructions": np.zeros(design_matrix.shape[1]),
"mental computation, visual instructions": np.zeros(design_matrix.shape[1]),
"mental computation, auditory instructions": np.zeros(design_matrix.shape[1]),
"left button press, visual instructions": np.zeros(design_matrix.shape[1]),
"visual sentence": np.zeros(design_matrix.shape[1]),
"auditory sentence": np.zeros(design_matrix.shape[1]),
}
conditions["auditory sentence"][0] = 1
conditions["vertical checkerboard"][1] = 1
auditory_minus_visual = conditions["auditory sentence"] - conditions["vertical checkerboard"]
plot_contrast_matrix(auditory_minus_visual , design_matrix=design_matrix)
from nilearn import plotting
from nilearn.glm.contrasts import compute_contrast
contrast = compute_contrast(labels, estimates, auditory_minus_visual,
contrast_type='t')
contrast_id="Audio_minus_Visual"
# we present the Z-transform of the t map
z_score = contrast.z_score()
# we plot it on the surface, on the inflated fsaverage mesh,
# together with a suitable background to give an impression
# of the cortex folding.
z_score = contrast.z_score()
surface_path=init_dir+"/derivatives/fmriprep/sub-011/ses-02/anat/sub-011_ses-02_hemi-R_inflated.surf.gii"
#plotting.plot_surf_stat_map(
# surface_path, z_score, hemi='right',
# title=contrast_id, colorbar=True,
# threshold=3., bg_map=fsnative_curv_path)
#fsaverage_path=init_dir+"/derivatives/fmriprep/sourcedata/freesurfer/fsaverage/surf/lh.inflated"
plotting.view_surf(surface_path, z_score, threshold=3., bg_map=fsnative_curv_path)